import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
x = np.linspace(-5.0,5.0, 100)
y = np.sqrt(10**2 - x**2)
x.shape
(100,)
y.shape
(100,)
y = np.hstack([y,-y])
x = np.hstack([x,-x])
x.shape
(200,)
y.shape
(200,)
x1 = np.linspace(-5.0, 5.0, 100)
y1 = np.sqrt(5**2 - x1**2)
y1=np.hstack([y1,-y1])
x1=np.hstack([x1,-x1])
y1.shape
(200,)
x1.shape
(200,)
plt.scatter(y,x)
plt.scatter(y1,x1)
<matplotlib.collections.PathCollection at 0x25935ba63d0>
# Change a DataFrame (x,y)
df1 = pd.DataFrame(np.vstack([y,x]).T,columns = ['X1','X2'])
df1['Y'] = 0
# Change a DataFrame (x1,y1)
df2 = pd.DataFrame(np.vstack([y1,x1]).T,columns = ['X1','X2'])
df2['Y'] = 1
#append the two dataframe(df1,df2)
df = df1.append(df2)
df.head()
C:\Users\dines\AppData\Local\Temp\ipykernel_13024\1397825995.py:2: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead. df = df1.append(df2)
| X1 | X2 | Y | |
|---|---|---|---|
| 0 | 8.660254 | -5.00000 | 0 |
| 1 | 8.717792 | -4.89899 | 0 |
| 2 | 8.773790 | -4.79798 | 0 |
| 3 | 8.828277 | -4.69697 | 0 |
| 4 | 8.881281 | -4.59596 | 0 |
x = df.iloc[:,:2]
y = df['Y']
x.head()
| X1 | X2 | |
|---|---|---|
| 0 | 8.660254 | -5.00000 |
| 1 | 8.717792 | -4.89899 |
| 2 | 8.773790 | -4.79798 |
| 3 | 8.828277 | -4.69697 |
| 4 | 8.881281 | -4.59596 |
y
0 0
1 0
2 0
3 0
4 0
..
195 1
196 1
197 1
198 1
199 1
Name: Y, Length: 400, dtype: int64
#Split the dataset into train_test
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25,random_state = 0)
x_train.shape
(300, 2)
y_train.shape
(300,)
x_test.shape
(100, 2)
x_test.shape
(100, 2)
from sklearn.svm import SVC
cls = SVC(kernel = "rbf")
cls.fit(x_train,y_train)
SVC()In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
SVC()
#Evaluation Metrics
from sklearn.metrics import accuracy_score
y_pred = cls.predict(x_test)
accuracy_score(y_test,y_pred)
1.0
df.head()
| X1 | X2 | Y | |
|---|---|---|---|
| 0 | 8.660254 | -5.00000 | 0 |
| 1 | 8.717792 | -4.89899 | 0 |
| 2 | 8.773790 | -4.79798 | 0 |
| 3 | 8.828277 | -4.69697 | 0 |
| 4 | 8.881281 | -4.59596 | 0 |
# We need to find components for the Polynomical Kernel
#X1,X2,X1_square,X2_square,X1*X2
df['X1_Square']= df['X1']**2
df['X2_Square']= df['X2']**2
df['X1*X2'] = (df['X1'] *df['X2'])
df.head()
| X1 | X2 | Y | X1_Square | X2_Square | X1*X2 | |
|---|---|---|---|---|---|---|
| 0 | 8.660254 | -5.00000 | 0 | 75.000000 | 25.000000 | -43.301270 |
| 1 | 8.717792 | -4.89899 | 0 | 75.999898 | 24.000102 | -42.708375 |
| 2 | 8.773790 | -4.79798 | 0 | 76.979390 | 23.020610 | -42.096467 |
| 3 | 8.828277 | -4.69697 | 0 | 77.938476 | 22.061524 | -41.466150 |
| 4 | 8.881281 | -4.59596 | 0 | 78.877155 | 21.122845 | -40.818009 |
df['X1'].shape
(400,)
# Independent and Dependent features
x = df[['X1','X2','X1_Square','X2_Square','X1*X2']]
y = df['Y']
y
0 0
1 0
2 0
3 0
4 0
..
195 1
196 1
197 1
198 1
199 1
Name: Y, Length: 400, dtype: int64
#train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size = 0.25,random_state = 0)
x_train.shape
(300, 5)
y_train.shape
(300,)
#import poltly
import plotly.express as px
fig = px.scatter_3d(df, x="X1", y ="X2",z="X1*X2",color = 'Y')
fig.show()
fig = px.scatter_3d(df, x="X1_Square", y ="X1_Square",z="X1*X2",color = 'Y')
fig.show()
classifier = SVC(kernel="linear")
classifier.fit(x_train, y_train)
y_pred = classifier.predict(x_test)
accuracy_score(y_test, y_pred)
1.0